[IA64] Fixes for 4k page support.
authorAlex Williamson <alex.williamson@hp.com>
Tue, 28 Aug 2007 18:30:31 +0000 (12:30 -0600)
committerAlex Williamson <alex.williamson@hp.com>
Tue, 28 Aug 2007 18:30:31 +0000 (12:30 -0600)
Some code is dependent on PAGE_SIZE and shouldn't be changed.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
xen/arch/ia64/xen/faults.c
xen/arch/ia64/xen/vhpt.c

index aa6b29d0f05a81bf01e261ee59e1118edc22c65d..296f50326ee960f3efcdd355c1fb50bfd7a2a59a 100644 (file)
@@ -729,6 +729,17 @@ ia64_shadow_fault(unsigned long ifa, unsigned long itir,
        unsigned long pte = 0;
        struct vhpt_lf_entry *vlfe;
 
+       /*
+        * v->arch.vhpt_pg_shift shouldn't be used here.
+        * Currently dirty page logging bitmap is allocated based
+        * on PAGE_SIZE. This is part of xen_domctl_shadow_op ABI.
+        * If we want to log dirty pages in finer grained when
+        * v->arch.vhpt_pg_shift < PAGE_SHIFT, we have to
+        * revise the ABI and update this function and the related
+        * tool stack (live relocation).
+        */
+       unsigned long vhpt_pg_shift = PAGE_SHIFT;
+
        /* There are 2 jobs to do:
           -  marking the page as dirty (the metaphysical address must be
              extracted to do that).
@@ -744,7 +755,7 @@ ia64_shadow_fault(unsigned long ifa, unsigned long itir,
        if (vlfe->ti_tag == ia64_ttag(ifa)) {
                /* The VHPT entry is valid.  */
                gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >>
-                                        v->arch.vhpt_pg_shift);
+                                        vhpt_pg_shift);
                BUG_ON(gpfn == INVALID_M2P_ENTRY);
        } else {
                unsigned long itir, iha;
@@ -760,10 +771,10 @@ ia64_shadow_fault(unsigned long ifa, unsigned long itir,
                /* Try again!  */
                if (fault != IA64_NO_FAULT) {
                        /* This will trigger a dtlb miss.  */
-                       ia64_ptcl(ifa, v->arch.vhpt_pg_shift << 2);
+                       ia64_ptcl(ifa, vhpt_pg_shift << 2);
                        return;
                }
-               gpfn = ((pte & _PAGE_PPN_MASK) >> v->arch.vhpt_pg_shift);
+               gpfn = ((pte & _PAGE_PPN_MASK) >> vhpt_pg_shift);
                if (pte & _PAGE_D)
                        pte |= _PAGE_VIRT_D;
        }
@@ -791,7 +802,7 @@ ia64_shadow_fault(unsigned long ifa, unsigned long itir,
                        /* Purge the TC locally.
                           It will be reloaded from the VHPT iff the
                           VHPT entry is still valid.  */
-                       ia64_ptcl(ifa, v->arch.vhpt_pg_shift << 2);
+                       ia64_ptcl(ifa, vhpt_pg_shift << 2);
 
                        atomic64_inc(&d->arch.shadow_fault_count);
                } else {
@@ -803,6 +814,6 @@ ia64_shadow_fault(unsigned long ifa, unsigned long itir,
                /* We don't know wether or not the fault must be
                   reflected.  The VHPT entry is not valid.  */
                /* FIXME: in metaphysical mode, we could do an ITC now.  */
-               ia64_ptcl(ifa, v->arch.vhpt_pg_shift << 2);
+               ia64_ptcl(ifa, vhpt_pg_shift << 2);
        }
 }
index 5812c73ba018adbd3f37e5564929a63283502129..16d188c2f4e34db57f4be86d9a9607bfeca1a27c 100644 (file)
@@ -384,7 +384,12 @@ __domain_flush_vtlb_track_entry(struct domain* d,
        int cpu;
        int vcpu;
        int local_purge = 1;
-       unsigned char ps = current->arch.vhpt_pg_shift;
+
+       /* tlb inert tracking is done in PAGE_SIZE uint. */
+       unsigned char ps = max_t(unsigned char,
+                                current->arch.vhpt_pg_shift, PAGE_SHIFT);
+       /* This case isn't supported (yet). */
+       BUG_ON(current->arch.vhpt_pg_shift > PAGE_SHIFT);
        
        BUG_ON((vaddr >> VRN_SHIFT) != VRN7);
        /*